From: kaf24@firebug.cl.cam.ac.uk Date: Mon, 3 Oct 2005 18:14:02 +0000 (+0100) Subject: Create new vcpu_op() hypercall. Replaces old boot_vcpu() X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~16763^2~56^2~11 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https://%22%22/%22http:/www.example.com/cgi/%22https:/%22%22?a=commitdiff_plain;h=836ead33c4861277544c98f2f359160b72462b39;p=xen.git Create new vcpu_op() hypercall. Replaces old boot_vcpu() hypercall and vcpu-related schedop commands. Signed-off-by: Keir Fraser --- diff --git a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c index 38940bbea6..ee5d05ab7b 100644 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #ifdef CONFIG_MATH_EMULATION #include #endif @@ -178,7 +179,7 @@ void cpu_idle (void) don't printk. */ __get_cpu_var(cpu_state) = CPU_DEAD; /* Tell hypervisor to take vcpu down. */ - HYPERVISOR_vcpu_down(cpu); + HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); #endif play_dead(); local_irq_enable(); diff --git a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c index 7fe4d8da07..c3e11cc0c2 100644 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c @@ -63,6 +63,7 @@ #include #include +#include /* Set if we find a B stepping CPU */ static int __initdata smp_b_stepping; @@ -882,11 +883,13 @@ static int __init do_boot_cpu(int apicid) ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT; - boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt); + boot_error = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt); if (boot_error) printk("boot error: %ld\n", boot_error); if (!boot_error) { + HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); + /* * allow APs to start initializing. */ @@ -1499,7 +1502,7 @@ int __devinit __cpu_up(unsigned int cpu) #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_XEN /* Tell hypervisor to bring vcpu up. */ - HYPERVISOR_vcpu_up(cpu); + HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); #endif /* Already up, and in cpu_quiescent now? */ if (cpu_isset(cpu, smp_commenced_mask)) { @@ -1621,5 +1624,6 @@ void vcpu_prepare(int vcpu) ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT; - (void)HYPERVISOR_boot_vcpu(vcpu, &ctxt); + (void)HYPERVISOR_vcpu_op(VCPUOP_create, vcpu, &ctxt); + (void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL); } diff --git a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c index a48b7aa469..86a050bba1 100644 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c @@ -62,8 +62,8 @@ #include #ifdef CONFIG_XEN #include - #include +#include #endif /* Change for real CPU hotplug. Note other files need to be fixed @@ -771,11 +771,13 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT; - boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt); + boot_error = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt); if (boot_error) printk("boot error: %ld\n", boot_error); if (!boot_error) { + HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); + /* * allow APs to start initializing. */ diff --git a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h index 6bdcb310ff..4509c065b3 100644 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h @@ -316,26 +316,10 @@ HYPERVISOR_vm_assist( } static inline int -HYPERVISOR_boot_vcpu( - unsigned long vcpu, vcpu_guest_context_t *ctxt) +HYPERVISOR_vcpu_op( + int cmd, int vcpuid, void *extra_args) { - return _hypercall2(int, boot_vcpu, vcpu, ctxt); -} - -static inline int -HYPERVISOR_vcpu_up( - int vcpu) -{ - return _hypercall2(int, sched_op, SCHEDOP_vcpu_up | - (vcpu << SCHEDOP_vcpushift), 0); -} - -static inline int -HYPERVISOR_vcpu_pickle( - int vcpu, vcpu_guest_context_t *ctxt) -{ - return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle | - (vcpu << SCHEDOP_vcpushift), ctxt); + return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int @@ -357,24 +341,6 @@ HYPERVISOR_suspend( return ret; } -static inline int -HYPERVISOR_vcpu_down( - int vcpu) -{ - int ret; - unsigned long ign1; - /* Yes, I really do want to clobber edx here: when we resume a - vcpu after unpickling a multi-processor domain, it returns - here, but clobbers all of the call clobbered registers. */ - __asm__ __volatile__ ( - TRAP_INSTR - : "=a" (ret), "=b" (ign1) - : "0" (__HYPERVISOR_sched_op), - "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift)) - : "memory", "ecx", "edx" ); - return ret; -} - #endif /* __HYPERCALL_H__ */ /* diff --git a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h index b3c1079277..f2b785143b 100644 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h @@ -601,24 +601,6 @@ HYPERVISOR_vm_assist( return 1; } -static inline int -HYPERVISOR_boot_vcpu( - unsigned long vcpu, vcpu_guest_context_t *ctxt) -{ -#if 0 - int ret; - unsigned long ign1, ign2; - - __asm__ __volatile__ ( - TRAP_INSTR - : "=a" (ret), "=b" (ign1), "=c" (ign2) - : "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt) - : "memory"); - - return ret; -#endif - return 1; -} #endif #endif /* __HYPERCALL_H__ */ diff --git a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h index 11e869b730..3421a300e5 100644 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h @@ -302,26 +302,10 @@ HYPERVISOR_vm_assist( } static inline int -HYPERVISOR_boot_vcpu( - unsigned long vcpu, vcpu_guest_context_t *ctxt) +HYPERVISOR_vcpu_op( + int cmd, int vcpuid, void *extra_args) { - return _hypercall2(int, boot_vcpu, vcpu, ctxt); -} - -static inline int -HYPERVISOR_vcpu_up( - int vcpu) -{ - return _hypercall2(int, sched_op, SCHEDOP_vcpu_up | - (vcpu << SCHEDOP_vcpushift), 0); -} - -static inline int -HYPERVISOR_vcpu_pickle( - int vcpu, vcpu_guest_context_t *ctxt) -{ - return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle | - (vcpu << SCHEDOP_vcpushift), ctxt); + return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index 1fee8f010b..31bd7b372f 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -808,7 +808,7 @@ ENTRY(hypercall_table) .long do_vm_assist .long do_update_va_mapping_otherdomain .long do_switch_vm86 - .long do_boot_vcpu + .long do_vcpu_op .long do_ni_hypercall /* 25 */ .long do_mmuext_op .long do_acm_op /* 27 */ @@ -841,7 +841,7 @@ ENTRY(hypercall_args_table) .byte 2 /* do_vm_assist */ .byte 5 /* do_update_va_mapping_otherdomain */ .byte 0 /* do_switch_vm86 */ - .byte 2 /* do_boot_vcpu */ + .byte 3 /* do_vcpu_op */ .byte 0 /* do_ni_hypercall */ /* 25 */ .byte 4 /* do_mmuext_op */ .byte 1 /* do_acm_op */ diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index a696b65dfe..10be586c78 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -629,7 +629,7 @@ ENTRY(hypercall_table) .quad do_vm_assist .quad do_update_va_mapping_otherdomain .quad do_switch_to_user - .quad do_boot_vcpu + .quad do_vcpu_op .quad do_set_segment_base /* 25 */ .quad do_mmuext_op .quad do_acm_op @@ -662,7 +662,7 @@ ENTRY(hypercall_args_table) .byte 2 /* do_vm_assist */ .byte 4 /* do_update_va_mapping_otherdomain */ .byte 0 /* do_switch_to_user */ - .byte 2 /* do_boot_vcpu */ + .byte 3 /* do_vcpu_op */ .byte 2 /* do_set_segment_base */ /* 25 */ .byte 4 /* do_mmuext_op */ .byte 1 /* do_acm_op */ diff --git a/xen/common/domain.c b/xen/common/domain.c index eaea4bcc9d..a4969811ee 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -18,6 +18,7 @@ #include #include #include +#include /* Both these structures are protected by the domlist_lock. */ rwlock_t domlist_lock = RW_LOCK_UNLOCKED; @@ -366,37 +367,17 @@ int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo) return rc; } -/* - * final_setup_guest is used for final setup and launching of domains other - * than domain 0. ie. the domains that are being built by the userspace dom0 - * domain builder. - */ -long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) +int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt) { - struct domain *d = current->domain; struct vcpu *v; - int rc = 0; - struct vcpu_guest_context *c; + int rc; - if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) ) - return -EINVAL; + ASSERT(d->vcpu[vcpuid] == NULL); - if ( alloc_vcpu_struct(d, vcpu) == NULL ) + if ( alloc_vcpu_struct(d, vcpuid) == NULL ) return -ENOMEM; - if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) - { - rc = -ENOMEM; - goto out; - } - - if ( copy_from_user(c, ctxt, sizeof(*c)) ) - { - rc = -EFAULT; - goto out; - } - - v = d->vcpu[vcpu]; + v = d->vcpu[vcpuid]; atomic_set(&v->pausecnt, 0); v->cpumap = CPUMAP_RUNANYWHERE; @@ -405,22 +386,73 @@ long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) arch_do_boot_vcpu(v); - if ( (rc = arch_set_info_guest(v, c)) != 0 ) + if ( (rc = arch_set_info_guest(v, ctxt)) != 0 ) goto out; sched_add_domain(v); - /* domain_unpause_by_systemcontroller */ - if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - vcpu_wake(v); + set_bit(_VCPUF_down, &v->vcpu_flags); + clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags); - xfree(c); return 0; out: - xfree(c); - arch_free_vcpu_struct(d->vcpu[vcpu]); - d->vcpu[vcpu] = NULL; + arch_free_vcpu_struct(d->vcpu[vcpuid]); + d->vcpu[vcpuid] = NULL; + return rc; +} + +long do_vcpu_op(int cmd, int vcpuid, void *arg) +{ + struct domain *d = current->domain; + struct vcpu *v; + struct vcpu_guest_context *ctxt; + long rc = 0; + + if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) + return -EINVAL; + + if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_create) ) + return -ENOENT; + + switch ( cmd ) + { + case VCPUOP_create: + if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) + { + rc = -ENOMEM; + break; + } + + if ( copy_from_user(ctxt, arg, sizeof(*ctxt)) ) + { + xfree(ctxt); + rc = -EFAULT; + break; + } + + LOCK_BIGLOCK(d); + rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST; + UNLOCK_BIGLOCK(d); + + xfree(ctxt); + break; + + case VCPUOP_up: + if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) ) + vcpu_wake(v); + break; + + case VCPUOP_down: + if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) ) + vcpu_sleep_nosync(v); + break; + + case VCPUOP_is_up: + rc = !test_bit(_VCPUF_down, &v->vcpu_flags); + break; + } + return rc; } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 77e0055e8c..fe6de36988 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -270,69 +270,6 @@ static long do_yield(void) return 0; } -/* Mark target vcpu as non-runnable so it is not scheduled */ -static long do_vcpu_down(int vcpu) -{ - struct vcpu *target; - - if ( vcpu > MAX_VIRT_CPUS ) - return -EINVAL; - - target = current->domain->vcpu[vcpu]; - if ( target == NULL ) - return -ESRCH; - set_bit(_VCPUF_down, &target->vcpu_flags); - - return 0; -} - -/* Mark target vcpu as runnable and wake it */ -static long do_vcpu_up(int vcpu) -{ - struct vcpu *target; - - if (vcpu > MAX_VIRT_CPUS) - return -EINVAL; - - target = current->domain->vcpu[vcpu]; - if ( target == NULL ) - return -ESRCH; - clear_bit(_VCPUF_down, &target->vcpu_flags); - /* wake vcpu */ - vcpu_wake(target); - - return 0; -} - -static long do_vcpu_pickle(int vcpu, unsigned long arg) -{ - struct vcpu *v; - vcpu_guest_context_t *c; - int ret = 0; - - if (vcpu >= MAX_VIRT_CPUS) - return -EINVAL; - v = current->domain->vcpu[vcpu]; - if (!v) - return -ESRCH; - /* Don't pickle vcpus which are currently running */ - if (!test_bit(_VCPUF_down, &v->vcpu_flags)) { - return -EBUSY; - } - c = xmalloc(vcpu_guest_context_t); - if (!c) - return -ENOMEM; - arch_getdomaininfo_ctxt(v, c); - if (copy_to_user((vcpu_guest_context_t *)arg, - (const vcpu_guest_context_t *)c, sizeof(*c))) - ret = -EFAULT; - xfree(c); - return ret; -} - -/* - * Demultiplex scheduler-related hypercalls. - */ long do_sched_op(unsigned long op, unsigned long arg) { long ret = 0; @@ -359,21 +296,6 @@ long do_sched_op(unsigned long op, unsigned long arg) domain_shutdown((u8)(op >> SCHEDOP_reasonshift)); break; } - case SCHEDOP_vcpu_down: - { - ret = do_vcpu_down((int)(op >> SCHEDOP_vcpushift)); - break; - } - case SCHEDOP_vcpu_up: - { - ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift)); - break; - } - case SCHEDOP_vcpu_pickle: - { - ret = do_vcpu_pickle((int)(op >> SCHEDOP_vcpushift), arg); - break; - } default: ret = -ENOSYS; @@ -395,8 +317,8 @@ long do_set_timer_op(s_time_t timeout) return 0; } -/** sched_id - fetch ID of current scheduler */ -int sched_id() +/* sched_id - fetch ID of current scheduler */ +int sched_id(void) { return ops.sched_id; } diff --git a/xen/include/public/vcpu.h b/xen/include/public/vcpu.h new file mode 100644 index 0000000000..c034f3f59e --- /dev/null +++ b/xen/include/public/vcpu.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * vcpu.h + * + * VCPU creation and hotplug. + * + * Copyright (c) 2005, Keir Fraser + */ + +#ifndef __XEN_PUBLIC_VCPU_H__ +#define __XEN_PUBLIC_VCPU_H__ + +/* + * Prototype for this hypercall is: + * int vcpu_op(int cmd, int vcpuid, void *extra_args) + * @cmd == VCPUOP_??? (VCPU operation). + * @vcpuid == VCPU to operate on. + * @extra_args == Operation-specific extra arguments (NULL if none). + */ + +/* + * Create a new VCPU. This must be called before a VCPU can be referred to + * in any other hypercall (e.g., to bind event channels). The new VCPU + * will not run until it is brought up by VCPUOP_up. + * + * @extra_arg == pointer to vcpu_guest_context structure containing initial + * state for the new VCPU. + */ +#define VCPUOP_create 0 + +/* + * Bring up a newly-created or previously brought-down VCPU. This makes the + * VCPU runnable. + */ +#define VCPUOP_up 1 + +/* + * Bring down a VCPU (i.e., make it non-runnable). + * There are a few caveats that callers should observe: + * 1. This operation may return, and VCPU_is_up may return false, before the + * VCPU stops running (i.e., the command is asynchronous). It is a good + * idea to ensure that the VCPU has entered a non-critical loop before + * bringing it down. Alternatively, this operation is guaranteed + * synchronous if invoked by the VCPU itself. + * 2. After a VCPU is created, there is currently no way to drop all its + * references to domain memory. Even a VCPU that is down still holds + * memory references via its pagetable base pointer and GDT. It is good + * practise to move a VCPU onto an 'idle' or default page table, LDT and + * GDT before bringing it down. + */ +#define VCPUOP_down 2 + +/* Returns 1 if the given VCPU is up. */ +#define VCPUOP_is_up 3 + +#endif /* __XEN_PUBLIC_VCPU_H__ */ diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h index d891566a4e..4775299773 100644 --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -55,7 +55,7 @@ #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_switch_vm86 23 /* x86/32 only */ #define __HYPERVISOR_switch_to_user 23 /* x86/64 only */ -#define __HYPERVISOR_boot_vcpu 24 +#define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_acm_op 27 @@ -201,12 +201,8 @@ struct mmuext_op { #define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */ #define SCHEDOP_block 1 /* Block until an event is received. */ #define SCHEDOP_shutdown 2 /* Stop executing this domain. */ -#define SCHEDOP_vcpu_down 3 /* make target VCPU not-runnable. */ -#define SCHEDOP_vcpu_up 4 /* make target VCPU runnable. */ -#define SCHEDOP_vcpu_pickle 5 /* save a vcpu's context to memory. */ #define SCHEDOP_cmdmask 255 /* 8-bit command. */ #define SCHEDOP_reasonshift 8 /* 8-bit reason code. (SCHEDOP_shutdown) */ -#define SCHEDOP_vcpushift 8 /* 8-bit VCPU target. (SCHEDOP_up|down) */ /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control